VT-d: Fix free VT-d page table issue
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 7 May 2008 08:19:40 +0000 (09:19 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 7 May 2008 08:19:40 +0000 (09:19 +0100)
This patch frees VT-d page tables from pgd, rather than free them
according to a guest address range.

This fixes [Bug 1244] Poweroff/Destroying HVM guest causes HV
crash. http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1244.

Signed-off-by: Weidong Han <weidong.han@intel.com>
xen/drivers/passthrough/vtd/iommu.c
xen/drivers/passthrough/vtd/iommu.h

index e1546f270845bcca1f9b06be74b7255c3d5b9fa4..b9f6a5d818db8d468c0ecda0e50197868d49869e 100644 (file)
@@ -678,17 +678,63 @@ void dma_pte_free_pagetable(struct domain *domain, u64 start, u64 end)
     }
 }
 
- /* free all VT-d page tables when shut down or destroy domain. */
+static void iommu_free_next_pagetable(u64 pt_maddr, unsigned long index,
+                                      int level)
+{
+    struct acpi_drhd_unit *drhd;
+    unsigned long next_index;
+    struct dma_pte *pt_vaddr, *pde;
+    int next_level;
+
+    if ( pt_maddr == 0 )
+        return;
+
+    pt_vaddr = (struct dma_pte *)map_vtd_domain_page(pt_maddr);
+    pde = &pt_vaddr[index];
+    if ( dma_pte_addr(*pde) != 0 )
+    {
+        next_level = level - 1;
+        if ( next_level > 1 )
+        {
+            next_index = 0;
+            do
+            {
+                iommu_free_next_pagetable(pde->val,
+                                          next_index, next_level);
+                next_index++;
+            } while ( next_index < PTE_NUM );
+        }
+
+        dma_clear_pte(*pde);
+        drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
+        iommu_flush_cache_entry(drhd->iommu, pde);
+        free_pgtable_maddr(pde->val);
+        unmap_vtd_domain_page(pt_vaddr);
+    }
+    else
+        unmap_vtd_domain_page(pt_vaddr);
+}
+
+/* free all VT-d page tables when shut down or destroy domain. */
 static void iommu_free_pagetable(struct domain *domain)
 {
+    unsigned long index;
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
-    int addr_width = agaw_to_width(hd->agaw);
-    u64 start, end;
+    int total_level = agaw_to_level(hd->agaw);
 
-    start = 0;
-    end = (((u64)1) << addr_width) - 1;
+    if ( hd->pgd_maddr != 0 )
+    {
+        index = 0;
+        do
+        {
+            iommu_free_next_pagetable(hd->pgd_maddr,
+                                      index, total_level + 1);
+            index++;
+        } while ( index < PTE_NUM );
 
-    dma_pte_free_pagetable(domain, start, end);
+        free_pgtable_maddr(hd->pgd_maddr);
+        hd->pgd_maddr = 0;
+    }
 }
 
 static int iommu_set_root_entry(struct iommu *iommu)
index 56bc550a4f7f981084eacb7c8783f94f87ef2d3b..c6ddf7849fc5a2c165bf839a359674fa82f2aa6d 100644 (file)
@@ -235,6 +235,7 @@ struct context_entry {
 /* page table handling */
 #define LEVEL_STRIDE       (9)
 #define LEVEL_MASK         ((1 << LEVEL_STRIDE) - 1)
+#define PTE_NUM            (1 << LEVEL_STRIDE)
 #define agaw_to_level(val) ((val) + 2)
 #define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
 #define width_to_agaw(w)   ((w - 30)/LEVEL_STRIDE)